xen: do not free reserved memory into heap
authorPenny Zheng <Penny.Zheng@arm.com>
Tue, 6 Sep 2022 07:39:14 +0000 (15:39 +0800)
committerJulien Grall <jgrall@amazon.com>
Tue, 6 Sep 2022 16:53:34 +0000 (17:53 +0100)
Pages used as guest RAM for static domain, shall be reserved to this
domain only.
So in case reserved pages being used for other purpose, users
shall not free them back to heap, even when last ref gets dropped.

This commit introduces a new helper free_domstatic_page to free
static page in runtime, and free_staticmem_pages will be called by it
in runtime, so let's drop the __init flag.

Wrapper #ifdef CONFIG_STATIC_MEMORY around function declaration(
free_staticmem_pages, free_domstatic_page, etc) is kinds of redundant,
so we decide to remove it here.

Signed-off-by: Penny Zheng <penny.zheng@arm.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
xen/arch/arm/include/asm/mm.h
xen/arch/arm/mm.c
xen/common/page_alloc.c
xen/include/xen/mm.h

index da25251cda36e53714e13759431a74e74c59e10c..749fbefa0cab1671f6a24e7f1355b337be1d9b9e 100644 (file)
@@ -121,9 +121,13 @@ struct page_info
   /* Page is Xen heap? */
 #define _PGC_xen_heap     PG_shift(2)
 #define PGC_xen_heap      PG_mask(1, 2)
-  /* Page is static memory */
+#ifdef CONFIG_STATIC_MEMORY
+/* Page is static memory */
 #define _PGC_static    PG_shift(3)
 #define PGC_static     PG_mask(1, 3)
+#else
+#define PGC_static     0
+#endif
 /* ... */
 /* Page is broken? */
 #define _PGC_broken       PG_shift(7)
index c81c706c8b2375b7b5eb53a28302061e42f6125f..7f5b317d3e5d89586229e28eef0da23172843d4b 100644 (file)
@@ -1496,7 +1496,10 @@ void put_page(struct page_info *page)
 
     if ( unlikely((nx & PGC_count_mask) == 0) )
     {
-        free_domheap_page(page);
+        if ( unlikely(nx & PGC_static) )
+            free_domstatic_page(page);
+        else
+            free_domheap_page(page);
     }
 }
 
index bfd4150be766afb7130d8b2ecb175716578d1db0..0c50dee4c59cecc4c01f00feae1726880c76743c 100644 (file)
@@ -2694,12 +2694,14 @@ struct domain *get_pg_owner(domid_t domid)
 
 #ifdef CONFIG_STATIC_MEMORY
 /* Equivalent of free_heap_pages to free nr_mfns pages of static memory. */
-void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
-                                 bool need_scrub)
+void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
+                          bool need_scrub)
 {
     mfn_t mfn = page_to_mfn(pg);
     unsigned long i;
 
+    spin_lock(&heap_lock);
+
     for ( i = 0; i < nr_mfns; i++ )
     {
         mark_page_free(&pg[i], mfn_add(mfn, i));
@@ -2710,9 +2712,41 @@ void __init free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
             scrub_one_page(pg);
         }
 
-        /* In case initializing page of static memory, mark it PGC_static. */
         pg[i].count_info |= PGC_static;
     }
+
+    spin_unlock(&heap_lock);
+}
+
+void free_domstatic_page(struct page_info *page)
+{
+    struct domain *d = page_get_owner(page);
+    bool drop_dom_ref;
+
+    if ( unlikely(!d) )
+    {
+        printk(XENLOG_G_ERR
+               "The about-to-free static page %"PRI_mfn" must be owned by a domain\n",
+               mfn_x(page_to_mfn(page)));
+        ASSERT_UNREACHABLE();
+        return;
+    }
+
+    ASSERT_ALLOC_CONTEXT();
+
+    /* NB. May recursively lock from relinquish_memory(). */
+    spin_lock_recursive(&d->page_alloc_lock);
+
+    arch_free_heap_page(d, page);
+
+    drop_dom_ref = !domain_adjust_tot_pages(d, -1);
+
+    spin_unlock_recursive(&d->page_alloc_lock);
+
+    free_staticmem_pages(page, 1, scrub_debug);
+
+    if ( drop_dom_ref )
+        put_domain(d);
 }
 
 /*
index 35b065146f23cdc59343febd5576568646790dd5..deadf4b2a1f74308ff73ba00573e223f02378478 100644 (file)
@@ -85,13 +85,12 @@ bool scrub_free_pages(void);
 } while ( false )
 #define FREE_XENHEAP_PAGE(p) FREE_XENHEAP_PAGES(p, 0)
 
-#ifdef CONFIG_STATIC_MEMORY
 /* These functions are for static memory */
 void free_staticmem_pages(struct page_info *pg, unsigned long nr_mfns,
                           bool need_scrub);
+void free_domstatic_page(struct page_info *page);
 int acquire_domstatic_pages(struct domain *d, mfn_t smfn, unsigned int nr_mfns,
                             unsigned int memflags);
-#endif
 
 /* Map machine page range in Xen virtual address space. */
 int map_pages_to_xen(